Close

@InProceedings{MatosRamoRomaNasc:2021:MuApAc,
               author = "Matos, Diognei de and Ramos, Washington and Romanhol, Luiz and 
                         Nascimento, Erickson",
          affiliation = "{Universidade Federal de Minas Gerais } and {Universidade Federal 
                         de Minas Gerais } and {Universidade Federal de Minas Gerais } and 
                         {Universidade Federal de Minas Gerais}",
                title = "Musical Hyperlapse: A Multimodal Approach to Accelerate 
                         First-Person Videos",
            booktitle = "Proceedings...",
                 year = "2021",
               editor = "Paiva, Afonso and Menotti, David and Baranoski, Gladimir V. G. and 
                         Proen{\c{c}}a, Hugo Pedro and Junior, Antonio Lopes Apolinario 
                         and Papa, Jo{\~a}o Paulo and Pagliosa, Paulo and dos Santos, 
                         Thiago Oliveira and e S{\'a}, Asla Medeiros and da Silveira, 
                         Thiago Lopes Trugillo and Brazil, Emilio Vital and Ponti, Moacir 
                         A. and Fernandes, Leandro A. F. and Avila, Sandra",
         organization = "Conference on Graphics, Patterns and Images, 34. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "Hyperlapse, Image Emotion Recognition, Music Emotion 
                         Recognition.",
             abstract = "With the advance of technology and social media usage, the 
                         recording of first-person videos is a widespread habit. These 
                         videos are usually very long and tiring to watch, bringing the 
                         need to speed-up them. Despite recent progress of fast-forward 
                         methods, in general, they do not consider inserting background 
                         music in the videos, which could make them more enjoyable. This 
                         paper presents a new methodology that creates accelerated videos 
                         and includes the background music keeping the same emotion induced 
                         by visual and acoustic modalities. Our methodology is based on the 
                         automatic recognition of emotions induced by music and video 
                         contents and an optimization algorithm that maximizes the visual 
                         quality of the output video and seeks to match the similarity of 
                         the music and the video's emotions. Quantitative results show that 
                         our method achieves the best performance in matching emotion 
                         similarity while also maintaining the visual quality of the 
                         hyperlapse when compared with other literature methods.",
  conference-location = "Gramado, RS, Brazil (virtual)",
      conference-year = "18-22 Oct. 2021",
                  doi = "10.1109/SIBGRAPI54419.2021.00033",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI54419.2021.00033",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/45CS7CS",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/45CS7CS",
           targetfile = "21.pdf",
        urlaccessdate = "2024, May 06"
}


Close